From e896859fb5d59e999c65406db6ce54d140ecd389 Mon Sep 17 00:00:00 2001 From: "djm@kirby.fc.hp.com" Date: Thu, 15 Dec 2005 16:09:19 -0600 Subject: [PATCH] Cleanup VTLB code Signed-off-by Anthony Xu --- xen/arch/ia64/vmx/vmmu.c | 138 ++++++++++-------------------------- xen/arch/ia64/vmx/vtlb.c | 83 ++++++++++++++-------- xen/include/asm-ia64/vmmu.h | 8 +-- 3 files changed, 97 insertions(+), 132 deletions(-) diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c index 01be4636b7..25761f97c4 100644 --- a/xen/arch/ia64/vmx/vmmu.c +++ b/xen/arch/ia64/vmx/vmmu.c @@ -246,144 +246,84 @@ alloc_pmt(struct domain *d) */ void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb) { -#if 0 - u64 saved_itir, saved_ifa; -#endif - u64 saved_rr; - u64 pages; u64 psr; thash_data_t mtlb; - ia64_rr vrr; unsigned int cl = tlb->cl; mtlb.ifa = tlb->vadr; mtlb.itir = tlb->itir & ~ITIR_RV_MASK; - vrr = vmmu_get_rr(d,mtlb.ifa); //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value); - pages = PSIZE(vrr.ps) >> PAGE_SHIFT; mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK; - mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, pages); + mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, 1); if (mtlb.ppn == INVALID_MFN) panic("Machine tlb insert with invalid mfn number.\n"); psr = ia64_clear_ic(); -#if 0 - saved_itir = ia64_getreg(_IA64_REG_CR_ITIR); - saved_ifa = ia64_getreg(_IA64_REG_CR_IFA); -#endif - saved_rr = ia64_get_rr(mtlb.ifa); - ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir); - ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa); - /* Only access memory stack which is mapped by TR, - * after rr is switched. - */ - ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.rrval)); - ia64_srlz_d(); if ( cl == ISIDE_TLB ) { - ia64_itci(mtlb.page_flags); - ia64_srlz_i(); + ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps); } else { - ia64_itcd(mtlb.page_flags); - ia64_srlz_d(); + ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps); } - ia64_set_rr(mtlb.ifa,saved_rr); - ia64_srlz_d(); -#if 0 - ia64_setreg(_IA64_REG_CR_IFA, saved_ifa); - ia64_setreg(_IA64_REG_CR_ITIR, saved_itir); -#endif ia64_set_psr(psr); ia64_srlz_i(); + return; } +/* + * Purge machine tlb. + * INPUT + * rr: guest rr. + * va: only bits 0:60 is valid + * size: bits format (1<> 3; // set VRN to 0. - // TODO: Set to enforce lazy mode - local_irq_save(psr); + psr = ia64_clear_ic(); ia64_setreg(_IA64_REG_CR_PTA, pta.val); - ia64_set_rr(0, vmx_vrrtomrr(v, vrr.rrval)); - ia64_srlz_d(); - hash_addr = ia64_thash(va); ia64_setreg(_IA64_REG_CR_PTA, saved_pta); - - ia64_set_rr(0, saved_rr0); - ia64_srlz_d(); ia64_set_psr(psr); + ia64_srlz_i(); return hash_addr; } -u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps) +u64 machine_ttag(PTA pta, u64 va) { - u64 saved_pta, saved_rr0; - u64 hash_addr, tag; - u64 psr; - struct vcpu *v = current; - ia64_rr vrr; +// u64 saved_pta; +// u64 hash_addr, tag; +// u64 psr; +// struct vcpu *v = current; - // TODO: Set to enforce lazy mode - saved_pta = ia64_getreg(_IA64_REG_CR_PTA); - saved_rr0 = ia64_get_rr(0); - vrr.rrval = saved_rr0; - vrr.rid = rid; - vrr.ps = ps; - - va = (va << 3) >> 3; // set VRN to 0. - local_irq_save(psr); - ia64_setreg(_IA64_REG_CR_PTA, pta.val); - ia64_set_rr(0, vmx_vrrtomrr(v, vrr.rrval)); - ia64_srlz_d(); - - tag = ia64_ttag(va); - ia64_setreg(_IA64_REG_CR_PTA, saved_pta); - - ia64_set_rr(0, saved_rr0); - ia64_srlz_d(); - local_irq_restore(psr); - return tag; +// saved_pta = ia64_getreg(_IA64_REG_CR_PTA); +// psr = ia64_clear_ic(); +// ia64_setreg(_IA64_REG_CR_PTA, pta.val); +// tag = ia64_ttag(va); + return ia64_ttag(va); +// ia64_setreg(_IA64_REG_CR_PTA, saved_pta); +// ia64_set_psr(psr); +// ia64_srlz_i(); +// return tag; } -/* - * Purge machine tlb. - * INPUT - * rr: guest rr. - * va: only bits 0:60 is valid - * size: bits format (1<> 3; // set VRN to 0. - saved_rr0 = ia64_get_rr(0); - vrr.rrval = saved_rr0; - vrr.rid = rid; - vrr.ps = ps; - local_irq_save(psr); - ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.rrval) ); - ia64_srlz_d(); - ia64_ptcl(va, ps << 2); - ia64_set_rr( 0, saved_rr0 ); - ia64_srlz_d(); - local_irq_restore(psr); -} int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref) diff --git a/xen/arch/ia64/vmx/vtlb.c b/xen/arch/ia64/vmx/vtlb.c index d6ae1a6220..a95d203fec 100644 --- a/xen/arch/ia64/vmx/vtlb.c +++ b/xen/arch/ia64/vmx/vtlb.c @@ -68,8 +68,7 @@ static void cch_free(thash_cb_t *hcb, thash_data_t *cch) static int __is_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE cl) { u64 size1,sa1,ea1; - - if ( tlb->rid != rid || tlb->cl != cl ) + if ( tlb->rid != rid ||(!tlb->tc && tlb->cl != cl) ) return 0; size1 = PSIZE(tlb->ps); sa1 = tlb->vadr & ~(size1-1); // mask the low address bits @@ -89,7 +88,7 @@ __is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, { uint64_t size1,size2,sa1,ea1,ea2; - if ( entry->invalid || entry->rid != rid || entry->cl != cl ) { + if ( entry->invalid || entry->rid != rid || (!entry->tc && entry->cl != cl ) ) { return 0; } size1=PSIZE(entry->ps); @@ -292,8 +291,7 @@ int __tlb_to_vhpt(thash_cb_t *hcb, if ( mfn == INVALID_MFN ) return 0; // TODO with machine discontinuous address space issue. - vhpt->etag = (hcb->vs->tag_func)( hcb->pta, - tlb->vadr, tlb->rid, tlb->ps); + vhpt->etag = (hcb->vs->tag_func)( hcb->pta, tlb->vadr); //vhpt->ti = 0; vhpt->itir = tlb->itir & ~ITIR_RV_MASK; vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK; @@ -331,6 +329,17 @@ void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx) rep_tr(hcb, entry, idx); return ; } +thash_data_t *vtlb_alloc_chain(thash_cb_t *hcb,thash_data_t *entry) +{ + thash_data_t *cch; + + cch = cch_alloc(hcb); + if(cch == NULL){ + thash_purge_all(hcb); + } + return cch; +} + thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry) { @@ -365,15 +374,14 @@ void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) u64 gppn; u64 ppns, ppne; - hash_table = (hcb->hash_func)(hcb->pta, - va, entry->rid, entry->ps); + hash_table = (hcb->hash_func)(hcb->pta, va); if( INVALID_ENTRY(hcb, hash_table) ) { *hash_table = *entry; hash_table->next = 0; } else { // TODO: Add collision chain length limitation. - cch = __alloc_chain(hcb,entry); + cch = vtlb_alloc_chain(hcb,entry); if(cch == NULL){ *hash_table = *entry; hash_table->next = 0; @@ -415,8 +423,7 @@ static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) { panic("Can't convert to machine VHPT entry\n"); } - hash_table = (hcb->hash_func)(hcb->pta, - va, entry->rid, entry->ps); + hash_table = (hcb->hash_func)(hcb->pta, va); if( INVALID_ENTRY(hcb, hash_table) ) { *hash_table = vhpt_entry; hash_table->next = 0; @@ -581,9 +588,7 @@ static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb, priv->rid = rid; vrr = (hcb->get_rr_fn)(hcb->vcpu,va); priv->ps = vrr.ps; - hash_table = (hcb->hash_func)(hcb->pta, - priv->_curva, rid, priv->ps); - + hash_table = (hcb->hash_func)(hcb->pta, priv->_curva); priv->s_sect = s_sect; priv->cl = cl; priv->_tr_idx = 0; @@ -605,11 +610,8 @@ static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb, priv->rid = rid; vrr = (hcb->get_rr_fn)(hcb->vcpu,va); priv->ps = vrr.ps; - hash_table = (hcb->hash_func)( hcb->pta, - priv->_curva, rid, priv->ps); - tag = (hcb->vs->tag_func)( hcb->pta, - priv->_curva, rid, priv->ps); - + hash_table = (hcb->hash_func)( hcb->pta, priv->_curva); + tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva); priv->tag = tag; priv->hash_base = hash_table; priv->cur_cch = hash_table; @@ -671,8 +673,7 @@ static thash_data_t *vtlb_next_overlap(thash_cb_t *hcb) } } priv->_curva += rr_psize; - priv->hash_base = (hcb->hash_func)( hcb->pta, - priv->_curva, priv->rid, priv->ps); + priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva); priv->cur_cch = priv->hash_base; } return NULL; @@ -697,10 +698,8 @@ static thash_data_t *vhpt_next_overlap(thash_cb_t *hcb) } } priv->_curva += rr_psize; - priv->hash_base = (hcb->hash_func)( hcb->pta, - priv->_curva, priv->rid, priv->ps); - priv->tag = (hcb->vs->tag_func)( hcb->pta, - priv->_curva, priv->rid, priv->ps); + priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva); + priv->tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva); priv->cur_cch = priv->hash_base; } return NULL; @@ -771,7 +770,26 @@ void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in) #endif (hcb->ins_hash)(hcb, in, in->vadr); } - +/* + * Purge one hash line (include the entry in hash table). + * Can only be called by thash_purge_all. + * Input: + * hash: The head of collision chain (hash table) + * + */ +static void thash_purge_line(thash_cb_t *hcb, thash_data_t *hash) +{ + if ( INVALID_ENTRY(hcb, hash) ) return; + thash_data_t *prev, *next; + next=hash->next; + while ( next ) { + prev=next; + next=next->next; + cch_free(hcb, prev); + } + // Then hash table itself. + INVALIDATE_HASH(hcb, hash); +} /* * Purge all TCs or VHPT entries including those in Hash table. * @@ -792,10 +810,17 @@ void thash_purge_all(thash_cb_t *hcb) #endif hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz); - for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) { - thash_rem_line(hcb, hash_table); + thash_purge_line(hcb, hash_table); + } + if(hcb->ht== THASH_TLB) { + hcb = hcb->ts->vhpt; + hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz); + for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) { + thash_purge_line(hcb, hash_table); + } } + local_flush_tlb_all(); } @@ -826,7 +851,7 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb, if ( cch ) return cch; vrr = (hcb->get_rr_fn)(hcb->vcpu,va); - hash_table = (hcb->hash_func)( hcb->pta,va, rid, vrr.ps); + hash_table = (hcb->hash_func)( hcb->pta, va); if ( INVALID_ENTRY(hcb, hash_table ) ) return NULL; @@ -893,7 +918,7 @@ void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry) s_sect.v = 0; thash_purge_entries(hcb->ts->vhpt, entry, s_sect); - machine_tlb_purge(entry->rid, entry->vadr, entry->ps); + machine_tlb_purge(entry->vadr, entry->ps); } /* diff --git a/xen/include/asm-ia64/vmmu.h b/xen/include/asm-ia64/vmmu.h index ff68af1465..429c79b4dc 100644 --- a/xen/include/asm-ia64/vmmu.h +++ b/xen/include/asm-ia64/vmmu.h @@ -148,8 +148,8 @@ typedef union thash_cch_mem { /* * Use to calculate the HASH index of thash_data_t. */ -typedef u64 *(THASH_FN)(PTA pta, u64 va, u64 rid, u64 ps); -typedef u64 *(TTAG_FN)(PTA pta, u64 va, u64 rid, u64 ps); +typedef u64 *(THASH_FN)(PTA pta, u64 va); +typedef u64 *(TTAG_FN)(PTA pta, u64 va); typedef u64 *(GET_MFN_FN)(domid_t d, u64 gpfn, u64 pages); typedef void *(REM_NOTIFIER_FN)(struct hash_cb *hcb, thash_data_t *entry); typedef void (RECYCLE_FN)(struct hash_cb *hc, u64 para); @@ -329,8 +329,8 @@ extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, in #define ITIR_RV_MASK (((1UL<<32)-1)<<32 | 0x3) #define PAGE_FLAGS_RV_MASK (0x2 | (0x3UL<<50)|(((1UL<<11)-1)<<53)) -extern u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps); -extern u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps); +extern u64 machine_ttag(PTA pta, u64 va); +extern u64 machine_thash(PTA pta, u64 va); extern void purge_machine_tc_by_domid(domid_t domid); extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb); extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va); -- 2.30.2